page->u.inuse.type_info);
}
-xmem_cache_t *domain_struct_cachep;
-xmem_cache_t *exec_domain_struct_cachep;
-
-void __init domain_startofday(void)
-{
- domain_struct_cachep = xmem_cache_create(
- "domain_cache", sizeof(struct domain),
- 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
- if ( domain_struct_cachep == NULL )
- panic("No slab cache for domain structs.");
-
- exec_domain_struct_cachep = xmem_cache_create(
- "exec_dom_cache", sizeof(struct exec_domain),
- 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
- if ( exec_domain_struct_cachep == NULL )
- BUG();
-}
-
struct domain *arch_alloc_domain_struct(void)
{
- return xmem_cache_alloc(domain_struct_cachep);
+ return xmalloc(struct domain);
}
void arch_free_domain_struct(struct domain *d)
{
- xmem_cache_free(domain_struct_cachep, d);
+ xfree(d);
}
struct exec_domain *arch_alloc_exec_domain_struct(void)
{
- return xmem_cache_alloc(exec_domain_struct_cachep);
+ return xmalloc(struct exec_domain);
}
void arch_free_exec_domain_struct(struct exec_domain *ed)
{
- xmem_cache_free(exec_domain_struct_cachep, ed);
+ xfree(ed);
}
void free_perdomain_pt(struct domain *d)
xmem_cache_init();
xmem_cache_sizes_init(max_page);
- domain_startofday();
-
start_of_day();
grant_table_init();
static void at_dump_cpu_state(int cpu);
-static xmem_cache_t *dom_info_cache;
-
static inline void __add_to_runqueue_head(struct domain *d)
{
list_add(RUNLIST(d), RUNQ(d->processor));
INIT_LIST_HEAD(RUNQ(i));
}
- dom_info_cache = xmem_cache_create("Atropos dom info",
- sizeof(struct at_dom_info),
- 0, 0, NULL, NULL);
-
return 0;
}
/* free memory associated with a task */
static void at_free_task(struct domain *p)
{
- xmem_cache_free( dom_info_cache, DOM_INFO(p) );
+ xfree( DOM_INFO(p) );
}
#define TIME_SLOP (s32)MICROSECS(50) /* allow time to slip a bit */
static s32 ctx_allow = (s32)MILLISECS(5); /* context switch allowance */
-static xmem_cache_t *dom_info_cache;
-
static inline void __add_to_runqueue_head(struct exec_domain *d)
{
list_add(RUNLIST(d), RUNQUEUE(d->processor));
{
struct domain *d = ed->domain;
if ( (d->sched_priv == NULL) ) {
- if ( (d->sched_priv = xmem_cache_alloc(dom_info_cache)) == NULL )
+ if ( (d->sched_priv = new(struct bvt_dom_info)) == NULL )
return -1;
memset(d->sched_priv, 0, sizeof(struct bvt_dom_info));
}
void bvt_free_task(struct domain *d)
{
ASSERT(d->sched_priv != NULL);
- xmem_cache_free(dom_info_cache, d->sched_priv);
+ xfree(d->sched_priv);
}
/* Control the scheduler. */
CPU_SVT(i) = 0; /* XXX do I really need to do this? */
}
- dom_info_cache = xmem_cache_create(
- "BVT dom info", sizeof(struct bvt_dom_info), 0, 0, NULL, NULL);
- if ( dom_info_cache == NULL )
- {
- printk("BVT: Failed to allocate domain info SLAB cache");
- return -1;
- }
-
return 0;
}
#define RUNLIST(d) ((struct list_head *)&(RR_INFO(d)->run_list))
#define RUNQUEUE(cpu) RUNLIST(schedule_data[cpu].idle)
-static xmem_cache_t *dom_info_cache;
-
static inline void __add_to_runqueue_head(struct domain *d)
{
list_add(RUNLIST(d), RUNQUEUE(d->processor));
for ( i = 0; i < NR_CPUS; i++ )
INIT_LIST_HEAD(RUNQUEUE(i));
- dom_info_cache = xmem_cache_create(
- "RR dom info", sizeof(struct rrobin_dom_info), 0, 0, 0, NULL);
- if ( dom_info_cache == NULL )
- {
- printk("Could not allocate SLAB cache.\n");
- return -1;
- }
-
return 0;
}
-
/* Allocates memory for per domain private scheduling data*/
static int rr_alloc_task(struct domain *d)
{
- if ( (d->sched_priv = xmem_cache_alloc(dom_info_cache)) == NULL )
+ if ( (d->sched_priv = new(struct rrobin_dom_info) == NULL )
return -1;
memset(d->sched_priv, 0, sizeof(struct rrobin_dom_info));
return 0;
static void rr_free_task(struct domain *d)
{
ASSERT(d->sched_priv != NULL);
- xmem_cache_free(dom_info_cache, d->sched_priv);
+ xfree(d->sched_priv);
}
/* Initialises idle task */